0 General functions and parameters

0.1 Hyperparameters

In [1]:
# HYPERPARAMETERS for image thresholding
# Color channel
s_thresh_min = 170
s_thresh_max = 255

# Sobel x
sobx_thresh_min = 20
sobx_thresh_max = 100
sobx_kernel_size = 3

# Sobel y
soby_thresh_min = 20
soby_thresh_max = 100
soby_kernel_size = 3

# Magnitude gradient
mag_thresh_min = 20
mag_thresh_max = 100
mag_kernel_size = 3

# Direction gradient
dir_thresh_min = 0.4
dir_thresh_max = 0.8
dir_kernel_size = 15

0.2 Plotting functions

In [2]:
def plot_images_from_dict(input_dictionary, images_per_line = 5, figw = 15, figh = 15, save = False):
    """
    Plots all images of a dictionary into the jupyter notebook
    
    Input:
    input_dictonary (dict): Input a dictionary with file path as keys and images as values
    images_per_line (int): Defines how many images will be displayed per line
    figw (int): Defines the width of the overall output figure
    figh (int): Defines the hight of the overall output figure
    save (bool): Determines whether the images will be saved under the same image path with the filename extension 
                "_annotated" (useful if images were modified)    
    """
    
    # Define required number of lines and columns in plot to create subplots
    num_images = len(input_dictionary)
    images_per_column = int(math.ceil(num_images/images_per_line))
    
    # Create subplots
    fig, axes = plt.subplots(images_per_column,images_per_line,figsize = (figw,figh))
    
    # Remove axis for all subplots
    for i, ax in enumerate(axes.flat):
        ax.axis("off")
    
    # Display all images
    for ax, image in zip(axes.flat,sorted(input_dictionary.keys())):
        ax.imshow(input_dictionary[image])
        ax.set_title(image)
        # Save all images if "save"-function was activated (to be used if images were modified before)
        if save:
            img_out_name = "{}_annotated.png".format(image[:image.find(".")])
            plt.imsave(img_out_name,input_dictionary[image].astype(np.uint8))
        
    # Output plot with all images    
    plt.tight_layout()
    plt.axis("off")
    plt.show()
In [3]:
def plot_image_comparison(img_before, img_after, img_name, annotation,cmap_before=None,cmap_after=None):
    """
    Plots a comparison between two images in the jupyter notebook
    
    Input:
    img_before (np.array): Input the initial image before the conversion is applied
    img_after (np.array): Input image after the conversion is applied
    img_name (string): Name of the image which should be displayed above the image description
    annotation (str): Input annotation to the image (conversion method)
    """
    images_per_column = 1
    images_per_line = 2
    figw = 13
    figh = 7
    fig, axes = plt.subplots(images_per_column,images_per_line,figsize = (figw,figh))
    for i, ax in enumerate(axes.flat):
        if i == 0:
            ax.imshow(img_before,cmap = cmap_before)
            ax.axis("off")
            ax.set_title("{}\nImage before {}".format(img_name, annotation))
        if i == 1:
            ax.imshow(img_after, cmap = cmap_after)
            ax.axis("off")
            ax.set_title("{}\nImage after {}".format(img_name, annotation))
    plt.tight_layout()
    plt.axis("off")
    plt.show()
    
import string
    
def save_image_incl_extension(img_after, initial_image_path, img_annotation):
    """
    Saves image after conversion to file
    
    Input:
    img_after (np.array): Image after conversion
    initial_image_path (str): Initial path from where the image is sourced
    img_annotation (str): Annotation to be added at the end of the initial image path
    """
    img_out_name = "{}_{}.png".format(initial_image_path[:initial_image_path.find(".")],img_annotation.replace(" ","_"))
    plt.imsave(img_out_name,img_after.astype(np.uint8))  

1 Camera calibration

1.1 Load images and fit corners

In [4]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import math
%matplotlib inline

# Define number of chessboard inside corner points
chess_corner_x = 9
chess_corner_y = 6

# Read in and create a list of calibration images
calibration_images = glob.glob("camera_cal/calibration*.jpg")

# Add container for filenames of images with detected edges and non-detected edges
img_corner_det_true = {}
img_corner_det_false = {}

# Array containers to store object and image points from all images
obj_points = [] # 3D points in real world
img_points = [] # 2D points in image

# Prepare object points like (0,0,0), (1,0,0), (2,0,0), ...., (7,5,0)
objp = np.zeros((chess_corner_x * chess_corner_y,3), np.float32)
objp[:,:2] = np.mgrid[0:chess_corner_x,0:chess_corner_y].T.reshape(-1,2) # x, y coordinates  

for fname in calibration_images:
    # Read in each image
    image = mpimg.imread(fname)
    
    # Convert image to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    
    # Find chessboard corners
    ret, corners = cv2.findChessboardCorners(gray,(chess_corner_x,chess_corner_y), None)
    
    # If corner points are found, add object points, image points
    if ret == True:
        img_points.append(corners)
        obj_points.append(objp)
        
        # Draw and display the corners
        image = cv2.drawChessboardCorners(image,(chess_corner_x, chess_corner_y), corners, ret)
        
        # Append images with corners to dictionary of images
        img_corner_det_true[fname] = image        
    
    else:
        # Append images with no corners identified to dictionary of images
        img_corner_det_false[fname] = image   
In [5]:
# Return plot of all images with corners found (and save them to "..._annotated.png"-files) 
# -> To execute second part in parentheses set last function argument to "True" 
# (not activated as computation takes a few seconds) 
print("\nAll images with identified corners:")
plot_images_from_dict(img_corner_det_true,3,15,20,False)

# Return plot of all images for which no corners have been identified (verify whether reason is that not all required corners are on the image)
print("\nAll images for which no corners could be identified:")
plot_images_from_dict(img_corner_det_false,3,15,6,False)
All images with identified corners:
All images for which no corners could be identified:

1.2 Calibrate camera

In [6]:
# Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points,img_points, gray.shape[::-1],None,None)

1.3 Perform distortion correction

In [7]:
# Function for distortion correction on single image
def distortion_correction(img,mtx,dist):
    return cv2.undistort(img,mtx,dist,None,mtx)
In [8]:
# Fit an undistorted test image of the chessboard
test_distorted = img_corner_det_false["camera_cal/calibration01.jpg"]
test_undistorted = distortion_correction(test_distorted,mtx,dist)
plot_image_comparison(test_distorted, test_undistorted, "camera_cal/calibration01.jpg","distortion correction",cmap_before=None,cmap_after=None)
plt.imsave("output_images/01_chess_dist.png",test_distorted)
plt.imsave("output_images/02_chess_dist.png",test_undistorted)

2 Pipeline (test images)

2.0 Load images

In [9]:
import glob

img_paths = glob.glob("test_images/*.jpg")
In [10]:
#img_path = "test_images/test1.jpg"
annotation = "distortion correction"
test_images = {}
for i, img_path in enumerate(img_paths):
    test_images[img_path] = mpimg.imread(img_path)
plt.imsave("output_images/03_original.png",test_images["test_images/test2.jpg"])

2.1 Distortion correction

In [11]:
test_images_dst = {}
for key in sorted(test_images.keys()):
    test_images_dst[key] = distortion_correction(test_images[key],mtx,dist)
    plot_image_comparison(test_images[key],test_images_dst[key],key, annotation,cmap_before=None,cmap_after=None)
plt.imsave("output_images/04_undistorted.png",test_images_dst["test_images/test2.jpg"])

2.2 Create threshholded binary image

2.2.1 Create S-channel based binary image

In [12]:
# Function for HLS color channel conversion
def RGB_to_HLS(img):
    return cv2.cvtColor(img,cv2.COLOR_RGB2HLS)

def HLS_to_S(img):
    return img[:,:,2]
In [13]:
# Convert to HLS color space and separate the S channel
# Note: dst is the undistorted image
test_images_hls = {}
for key in sorted(test_images_dst.keys()):
    test_images_hls[key] =RGB_to_HLS(test_images_dst[key])
In [14]:
test_images_s_channel = {}
annotation = "S-channel"
for key in sorted(test_images_dst.keys()):
    test_images_s_channel[key] = HLS_to_S(test_images_hls[key])
    plot_image_comparison(test_images_dst[key],test_images_s_channel[key],key, annotation, cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/05_s_channel.png",test_images_s_channel["test_images/test2.jpg"],cmap = "gray")
In [15]:
def S_to_thresh(img, s_thresh_min, s_thresh_max):
    s_binary = np.zeros_like(img)
    s_binary[(img >= s_thresh_min) & (img <= s_thresh_max)] = 1
    return s_binary   
In [16]:
test_images_s_thresh = {}
annotation = "S-binary-threshold"
for key in sorted(test_images_s_channel.keys()):
    test_images_s_thresh[key] = S_to_thresh(test_images_s_channel[key],s_thresh_min,s_thresh_max)
    plot_image_comparison(test_images_s_channel[key],test_images_s_thresh[key],key, annotation,cmap_before="gray",
                          cmap_after="gray")
plt.imsave("output_images/06_s_binary.png",test_images_s_thresh["test_images/test2.jpg"],cmap = "gray")

2.2.2 Create Sobel based binary image

In [17]:
# Define sobel threshold functions
def abs_sobel_thresh(image, orient='x', sobel_kernel=3, thresh=(0, 255)):
    # Calculate directional gradient
    gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
    if orient == "x":
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel))
    if orient == "y":
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel))
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # Apply threshold
    grad_binary = np.zeros_like(scaled_sobel)
    grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
    return grad_binary
In [18]:
test_images_abssob_x = {}
annotation = "Sobelx"
for key in sorted(test_images_dst.keys()):
    test_images_abssob_x[key] = abs_sobel_thresh(test_images_dst[key], orient='x', sobel_kernel= sobx_kernel_size, 
                                                 thresh=(sobx_thresh_min, sobx_thresh_max))    
    plot_image_comparison(test_images_dst[key],test_images_abssob_x[key],key, annotation,cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/07_abssob_x.png",test_images_abssob_x["test_images/test2.jpg"],cmap = "gray")
In [19]:
test_images_abssob_y = {}
annotation = "Sobely"
for key in sorted(test_images_dst.keys()):
    test_images_abssob_y[key] = abs_sobel_thresh(test_images_dst[key], orient='y', sobel_kernel= soby_kernel_size, 
                                                 thresh=(soby_thresh_min, soby_thresh_max))    
    plot_image_comparison(test_images_dst[key],test_images_abssob_y[key],key, annotation,cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/08_abssob_y.png",test_images_abssob_y["test_images/test2.jpg"],cmap = "gray")
In [20]:
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
    # Calculate gradient magnitude
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    sobelx = cv2.Sobel(gray, cv2.CV_64F,1,0, ksize = sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F,0,1, ksize = sobel_kernel)
    gradmag = np.sqrt(sobelx**2,sobely**2)
    scale_factor = np.max(gradmag)/255
    gradmag = (gradmag/scale_factor).astype(np.uint8)
    # Apply threshold
    mag_binary = np.zeros_like(gradmag)
    mag_binary[(gradmag >= mag_thresh[0])&(gradmag <= mag_thresh[1])] = 1    
    return mag_binary
In [21]:
test_images_mag_grad = {}
annotation = "magnitude gradient"
for key in sorted(test_images_dst.keys()):
    test_images_mag_grad[key] = mag_thresh(test_images_dst[key], sobel_kernel= mag_kernel_size, 
                                           mag_thresh=(mag_thresh_min, mag_thresh_max))    
    plot_image_comparison(test_images_dst[key],test_images_mag_grad[key],key, annotation,cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/09_mag_grad.png",test_images_mag_grad["test_images/test2.jpg"],cmap = "gray")
In [22]:
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
    # Calculate gradient direction
    gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
    sobelx = cv2.Sobel(gray, cv2.CV_64F,1,0, ksize = sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F,0,1, ksize = sobel_kernel)
    absgraddir = np.arctan2(np.absolute(sobelx),np.absolute(sobely))
    # Apply threshold
    dir_binary = np.zeros_like(absgraddir)
    dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
    return dir_binary
In [23]:
test_images_dir_grad = {}
annotation = "direction gradient"
for key in sorted(test_images_dst.keys()):
    test_images_dir_grad[key] = dir_threshold(test_images_dst[key], sobel_kernel=dir_kernel_size, 
                                              thresh=(dir_thresh_min, dir_thresh_max))    
    plot_image_comparison(test_images_dst[key],test_images_dir_grad[key],key, annotation,cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/10_dir_grad.png",test_images_dir_grad["test_images/test2.jpg"],cmap = "gray")
In [24]:
def sob_to_sobcomb(img_gradx, img_grady, img_mag_grad, img_dir_grad):
    assert img_gradx.shape == img_grady.shape == img_mag_grad.shape == img_dir_grad.shape, "not all input images have the same shape"
    combined = np.zeros_like(img_gradx)
    combined[((img_gradx == 1) & (img_grady == 1)) | ((img_mag_grad == 1) & (img_dir_grad == 1))] = 1
    return combined  
In [25]:
test_images_sobcomb = {}
annotation = "combined sobel"
for key in sorted(test_images_dst.keys()):
    test_images_sobcomb[key] = sob_to_sobcomb(test_images_abssob_x[key],test_images_abssob_y[key],
                                              test_images_mag_grad[key],test_images_dir_grad[key])
    plot_image_comparison(test_images_dst[key],test_images_sobcomb[key],key, annotation,cmap_before=None,
                          cmap_after="gray")
plt.imsave("output_images/11_sobcomb.png",test_images_sobcomb["test_images/test2.jpg"],cmap = "gray")

2.2.3 Create overall combined binary image

In [26]:
def col_sob_comb(img_sobcomb,img_s_thresh):
    assert img_sobcomb.shape == img_s_thresh.shape, "not all input images have the same shape"
    combined_bin = np.zeros_like(img_sobcomb)
    combined_bin[(img_sobcomb == 1) | (img_s_thresh == 1)] = 1
    return combined_bin
In [27]:
color_binary = {}
combined_binary = {}

for key in sorted(test_images.keys()):

    # Stack each channel to view their individual contributions in green and blue respectively
    # This returns a stack of the two binary images, whose components you can see as different colors
    color_binary[key] = np.dstack(( np.zeros_like(test_images_dir_grad[key]), test_images_sobcomb[key], 
                                   test_images_s_thresh[key]))

    # Combine the two binary thresholds
    combined_binary[key] = col_sob_comb(test_images_sobcomb[key],test_images_s_thresh[key])
    
plt.imsave("output_images/12_combined_binary.png",combined_binary["test_images/test2.jpg"],cmap = "gray")
    

for col,com in zip(sorted(color_binary.keys()),sorted(combined_binary.keys())):
    # Plotting thresholded images
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    ax1.set_title('Stacked thresholds')
    ax1.imshow(color_binary[col])

    ax2.set_title('Combined S channel and gradient thresholds')
    ax2.imshow(combined_binary[com], cmap='gray')

2.2.4 Create overall function for binary creation

In [28]:
# Function for creating thresholded binary image on single image (function summarizes all previous functions)
def img_to_thresh_bin(img,s_thresh_min=170, s_thresh_max=255, sobx_thresh_min=20, sobx_thresh_max=100, 
                      sobx_kernel_size=3, soby_thresh_min=20, soby_thresh_max = 100, soby_kernel_size = 3, 
                      mag_thresh_min = 20, mag_thresh_max = 100, mag_kernel_size = 3, dir_thresh_min = 0.4, 
                      dir_thresh_max = 0.8, dir_kernel_size = 15):

    # Create S-Channel binary pipeline
    hls = RGB_to_HLS(img)
    s = HLS_to_S(hls)
    s_thresh = S_to_thresh(s, s_thresh_min, s_thresh_max)
    
    # Create sobel binary pipeline
    sobelx = abs_sobel_thresh(img, orient='x', sobel_kernel= sobx_kernel_size, thresh=(sobx_thresh_min, sobx_thresh_max)) 
    sobely = abs_sobel_thresh(img, orient='y', sobel_kernel= soby_kernel_size, thresh=(soby_thresh_min, soby_thresh_max))
    mag_grad = mag_thresh(img, sobel_kernel= mag_kernel_size, mag_thresh=(mag_thresh_min, mag_thresh_max))
    dir_grad = dir_threshold(img, sobel_kernel=dir_kernel_size, thresh=(dir_thresh_min, dir_thresh_max))
    sob_comb = sob_to_sobcomb(sobelx,sobely,mag_grad,dir_grad)
    
    # Return overall combined binary
    return col_sob_comb(sob_comb,s_thresh)

2.3 Perform perspective transform

In [29]:
plt.imshow(test_images_dst["test_images/straight_lines1.jpg"])
plt.axis("off")
plt.plot(255,688,".")
plt.plot(1051,688,".")
plt.plot(595,452,".")
plt.plot(686,452,".")
plt.savefig("output_images/13_src_points")
In [30]:
# Four source coordinates
src = np.float32(
    [[255,688],
     [1051,688],
     [595,452],
     [686,452]])

# Four desired coordinates
dst = np.float32(
    [[360,720],
     [946,720],
     [360,0],
     [946,0]])
In [31]:
def warp(img,src,dst):
    
    # Define calibration box in source (origingal) and destination (desired or warped) coordinates
    img_size = (img.shape[1],img.shape[0])    
    
    # Compute the perspective transform, M
    M = cv2.getPerspectiveTransform(src, dst)
    
    # Create warped image - uses linear interpolation
    warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
    
    return warped   
In [32]:
# Ensure that example image is warped correctly
example_warped = warp(test_images_dst["test_images/straight_lines1.jpg"],src,dst)
plt.imshow(example_warped)
plt.axis("off")
plt.plot(360,720,".")
plt.plot(944,720,".")
plt.plot(360,0,".")
plt.plot(946,0,".")
plt.savefig("output_images/14_dst_points")
In [33]:
# Warp test images
test_images_warped = {}
for key in sorted(test_images_dst.keys()):
    test_images_warped[key] = warp(test_images_dst[key],src,dst)    
    plot_image_comparison(test_images_dst[key],test_images_warped[key],key, annotation,cmap_before=None,cmap_after=None)
plt.imsave("output_images/15_warped.png",test_images_warped["test_images/test2.jpg"])
In [34]:
# Warp combined binary
test_images_warped_cb = {}
for key in sorted(test_images_dst.keys()):
    test_images_warped_cb[key] = warp(combined_binary[key],src,dst)    
    plot_image_comparison(test_images_warped[key],test_images_warped_cb[key],key, annotation,cmap_before=None,cmap_after="gray")
plt.imsave("output_images/16_warped_binary.png", test_images_warped_cb["test_images/test2.jpg"],cmap = "gray")

2.4 Identify lane-line pixels and fit with polynomial

In [35]:
# Search for lanes if no line fitted so far

def identify_lane_line_first(img):
    """
    Input:
    img (np.array): The warped input image
    Output:
    out_img (np.array): The output image containing the fitted windows in green,
                        the binary points contributing to the left line regression in red,
                        the binary points contributing to the right line regression in blue and 
                        the rest of the binary points in white
    left_fit (np.array): Vector of coefficients for fitted second degree polynomial for left line
    right_fit (np.array): Vector of coefficients for fitted second degree polynomial for right line
    ploty (np.array): A numpy array with the range of x-pixels as values
    left_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for left line
    right_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for right line
    left and right line position of pixels being attributed to respective line:
        lefty, righty, leftx, rightx
    leftx_dir_marker (bool): Determines curve direction of left lane - if True -> right turn, 
                        if Fales -> left turn
    rightx_dir_marker (bool): Determines curve direction of right lane - if True -> right turn, 
                        if Fales -> left turn
    """
    # Take a histogram of the bottom half of the image
    histogram = np.sum(img[img.shape[0]/2:,:], axis=0)
    #plt.plot(histogram)
    
    # Create an output image to draw on and  visualize the result
    out_img = np.dstack((img, img, img))*255
    #plt.imshow(img)
    #plt.show()
    
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    
    # Choose the number of sliding windows
    nwindows = 8
    
    # Set height of windows
    window_height = np.int(img.shape[0]/nwindows)
    
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = img.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    
    # Set the width of the windows +/- margin
    margin = 80
    # Set minimum number of pixels found to recenter window
    minpix = 60
    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []
    
    # Create dictionary to collect window positions (already hast starting value in it)
    leftx_rep = {0:leftx_base}
    rightx_rep = {0:rightx_base}
    
    # Step through the windows one by one (first round) to identify correct window position
    # by taking previous image position as starting point and resentering the window position via
    # taking the mean of binaries in the initial window
    for window in range(nwindows):
        
        # Identify window boundaries in y direction
        win_y_low = img.shape[0] - (window+1) * window_height
        win_y_high = img.shape[0] - window * window_height
       
        # Identify window boundaries in x direction for both left and right and left lanes and 
        # left and right window side
        # Check whether current window position is available and use it (the case for first window 
        # as position was already defined in the dictionaries leftx_rep and rightx_rep)
        try:
            win_xleft_low = leftx_rep[window] - margin
            win_xleft_high = leftx_rep[window] + margin
            win_xright_low = rightx_rep[window] - margin
            win_xright_high = rightx_rep[window] + margin
        
        # Take the previous window as starting point (for all windows except the first one)
        except:
            win_xleft_low = leftx_rep[window-1] - margin
            win_xleft_high = leftx_rep[window-1] + margin
            win_xright_low = rightx_rep[window-1] - margin
            win_xright_high = rightx_rep[window-1] + margin
            

        
        # Identify the nonzero pixels in x and y within the currently identified window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
                          (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
                           (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        
        # If > minpix pixels found, position the current left window on their mean 
        # position in x direction
        if len(good_left_inds) > minpix:
            leftx_rep[window] = np.int(np.mean(nonzerox[good_left_inds]))
        # Else position the current left window on the previous window's position in x direction
        else:
            if (window != 0):
                leftx_rep[window] = leftx_rep[window-1]
        # If > minpix pixels found, position the current right window on their mean position 
        # in x direction    
        if len(good_right_inds) > minpix:        
            rightx_rep[window] = np.int(np.mean(nonzerox[good_right_inds]))
        # Else position the current right window on the previous window's position in x direction
        else:
            if (window != 0):
               rightx_rep[window] = rightx_rep[window-1]
            
    # Reduce the mean of all window positions for left and right lane
    # from the mean of the starting window to determine the general direction of the turn
    # if negative -> right turn, if positive left turn
    
    leftx_dir = leftx_rep[0] - np.mean(list(leftx_rep.values()))
    rightx_dir = rightx_rep[0] - np.mean(list(rightx_rep.values()))
    
    # Create direction marker with right turn = True and left turn = False for both left and right line
    if leftx_dir < 0:
        leftx_dir_marker = True
    else:
        leftx_dir_marker = False
    
    if rightx_dir < 0:
        rightx_dir_marker = True
    else:
        rightx_dir_marker = False
    """
    # Not required anymore as lanes with different directions are taken care of in the final pipeline
    # If the direction markers for left and right line are pointing in different directions,
    # ensure that the direction of the line with a stronger turn will be used for both left and right 
    # direction marker
    if (leftx_dir_marker != rightx_dir_marker):
        if abs(leftx_dir) >= abs(rightx_dir):
            rightx_dir_marker = leftx_dir_marker
            
        else:
            leftx_dir_marker = rightx_dir_marker
    """

    
    # Step through the windows one by one (second round) to harmonize windows which seem to be biased
    # as they do not conform with general turn structure
    for window in range(nwindows):
        # Take all windows after the first one
        if window != 0:
            # Perform window adjustments for right turns for left and right line
            if rightx_dir_marker:
                # If the window position is left of the previous one (probably biased by some 
                # distortion on the left)
                # Right line
                if rightx_rep[window] <= rightx_rep[window-1]:
                    # Put window in the middle between previous and next window in x direction
                    try:
                        rightx_rep[window] = int(np.mean([rightx_rep[window - 1],
                                                          rightx_rep[window + 1]]))
                    # For last window take the position of previous window in x direction
                    except:
                        rightx_rep[window] = rightx_rep[window - 1]
                # Left line
                if leftx_rep[window] <= leftx_rep[window-1]:
                    # Put window in the middle between previous and next window in x direction
                    try:
                        leftx_rep[window] = int(np.mean([leftx_rep[window - 1],leftx_rep[window + 1]]))
                    # For last window take the position of previous window in x direction
                    except:
                        leftx_rep[window] = leftx_rep[window - 1]
                        
            # Perform window adjustments for left turns
            else:
                # If the window position is right of the previous one (probably biased by some 
                # distortion on the left)
                # Right line
                if rightx_rep[window] >= rightx_rep[window-1]:
                    # Put window in the middle between previous and next window in x direction
                    try:
                        rightx_rep[window] = int(np.mean([rightx_rep[window - 1],
                                                          rightx_rep[window + 1]]))
                    # For last window take the position of previous window in x direction
                    except:
                        rightx_rep[window] = rightx_rep[window - 1]
                if leftx_rep[window] >= leftx_rep[window-1]:
                    # Put window in the middle between previous and next window in x direction
                    try:
                        leftx_rep[window] = int(np.mean([leftx_rep[window - 1],leftx_rep[window + 1]]))
                    # For last window take the position of previous window in x direction
                    except:
                        leftx_rep[window] = leftx_rep[window - 1]
                        
        # Identify window boundaries in y direction
        win_y_low = img.shape[0] - (window+1) * window_height
        win_y_high = img.shape[0] - window * window_height      
        
        
        # Identify window boundaries in x direction for both left and right and left lanes and 
        # left and right window side       
        win_xleft_low = leftx_rep[window] - margin
        win_xleft_high = leftx_rep[window] + margin
        win_xright_low = rightx_rep[window] - margin
        win_xright_high = rightx_rep[window] + margin
        
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
                          (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
                           (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        
        cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 4) 
        cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 4)   
    
    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 

    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)    
    
    # Generate x and y values for plotting
    ploty = np.array(range(0,img.shape[0]))
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
    
    return out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx, leftx_dir_marker, rightx_dir_marker
In [36]:
# Search for lane lines around line found in previous picture
def identify_lane_line_cont(img, left_fit, right_fit):
    """
    Input:
    img (np.array): The warped input image
    left_fit (np.array): Vector of coefficients for fitted second degree polynomial for left line 
                            of previous image
    right_fit (np.array): Vector of coefficients for fitted second degree polynomial for right line 
                            of previous image
    Output:
    out_img (np.array): The output image containing the fitted windows in green,
                        the binary points contributing to the left line regression in red, 
                        the binary points contributing to the right line regression in blue and 
                        the rest of the binary points in white
    left_fit (np.array): Vector of coefficients for fitted second degree polynomial for left line
    right_fit (np.array): Vector of coefficients for fitted second degree polynomial for right line
    ploty (np.array): A numpy array with the range of x-pixels as values
    left_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for left line 
                            for current image
    right_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for right line 
                            for current image
    leftx_dir_marker (bool): Determines curve direction of left lane - if True -> right turn, 
                        if Fales -> left turn
    rightx_dir_marker (bool): Determines curve direction of right lane - if True -> right turn, 
                        if Fales -> left turn
    """
    
    
    # Assume you now have a new warped binary image 
    # from the next frame of video (also called "binary_warped")
    # It's now much easier to find line pixels!
    nonzero = img.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 80
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - 
                                   margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
                                                           left_fit[1]*nonzeroy + left_fit[2] + margin))) 
    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] -
                                    margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
                                                            right_fit[1]*nonzeroy + right_fit[2] + margin)))  

    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    # Generate x and y values for plotting
    ploty = np.linspace(0, img.shape[0]-1, img.shape[0])
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    # Determine the direction of the turns
    
    # For left line
    left_line_indicator = left_fitx[0]-left_fitx[len(ploty)-1]
    # Right turn
    leftx_dir_marker = True
    # Left turn
    if left_line_indicator < 0:
        leftx_dir_marker = False
        
    # For right lane
    right_line_indicator = right_fitx[0]-right_fitx[len(ploty)-1]
    # Right turn
    rightx_dir_marker = True
    # Left turn
    if right_line_indicator < 0:
        rightx_dir_marker = False
        
    
    # Create an image to draw on and an image to show the selection window
    out_img = np.dstack((img, img, img))*255
    window_img = np.zeros_like(out_img)
    # Color in left and right line pixels
    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]

    
    # Generate a polygon to illustrate the search window area
    # And recast the x and y points into usable format for cv2.fillPoly()
    left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
    left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
    left_line_pts = np.hstack((left_line_window1, left_line_window2))
    right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
    right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
    right_line_pts = np.hstack((right_line_window1, right_line_window2))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
    cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
    out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
    
    return out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx, leftx_dir_marker, rightx_dir_marker
In [37]:
# Function to plot and save an example image
def plot_identified_lane_image(out_img,ploty,left_fitx,right_fitx,ending):
    plt.imshow(out_img)
    plt.plot(left_fitx, ploty, color='yellow')
    plt.plot(right_fitx, ploty, color='yellow')
    plt.xlim(0, 1280)
    plt.ylim(720, 0)
    plt.axis("off")
    plt.savefig("output_images/{}.png".format(ending))
    plt.show()
In [38]:
# Test of initial lane_line_identification (function identify_lane_line_first)
out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx,_,_ = identify_lane_line_first(
    test_images_warped_cb["test_images/test2.jpg"])

plot_identified_lane_image(out_img,ploty,left_fitx,right_fitx,"19_identified_lanes_first")

# Test of search around line found in previous picture (function identify_lane_line_cont) 
# -> Test currently performed with same image as initial image
out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx,_,_ = identify_lane_line_cont(
    test_images_warped_cb["test_images/test2.jpg"],left_fit, right_fit)

plot_identified_lane_image(out_img,ploty,left_fitx,right_fitx, "20_identified_lanes_second")
/home/wolfgang/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/ipykernel/__main__.py:25: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future

2.5 Calculate radius of curvature and position of vehicle

In [39]:
def radius_and_position_warped(ploty,left_fit,right_fit):
    """
    Input:
    left_fit (np.array): Vector of coefficients for fitted second degree polynomial for left line
    right_fit (np.array): Vector of coefficients for fitted second degree polynomial for right line
    ploty (np.array): A numpy array with the range of x-pixels as values
    Output:
    left_curverad (float): Curve radius for right turn in pixel
    right_curverad (float): Curve radius for right turn in pixel
    """
    # Choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(ploty)
    
    # Define left curve radius
    left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
    
    # Define right curve radius
    right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
    
    return left_curverad, right_curverad
In [40]:
def convert_radpos_to_real(ploty, lefty, righty, leftx, rightx):
    """
    ploty (np.array): A numpy array with the range of x-pixels as values
    left and right line position of pixels being attributed to respective line:
        lefty, righty, leftx, rightx
    Output:
    left_curverad (float): Curve radius for right turn in meter
    right_curverad (float): Curve radius for right turn in meter
    """
    # Choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(ploty)
    
    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/700 # meters per pixel in x dimension

    # Fit new polynomials to x,y in world space
    left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
    right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
    
    # Calculate the new radii of curvature
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
    # Now our radius of curvature is in meters
    return left_curverad, right_curverad
In [41]:
# If car to wide on the left: negativ value
def det_position_warped(img, ploty, left_fitx, right_fitx):
    """
    Input:
    img (np.array): The warped image for which the position should be identified
    ploty (np.array): A numpy array with the range of x-pixels as values
    left_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for left line 
                            for current image
    right_fitx (np.array): Fitted 2nd degree polynomial points for all "ploty-values" for right line 
                            for current image
    Output:
    Offset (int): Offset of the car in pixel. If value negative -> Car offset to the left
                                                If value positive -> Car is offset to the right
    line_base_pos_left (int): Distance from middle of car to left lane in pixel (positive value)
    line_base_pos_right (int): Distance from middle of car to right lane in pixel (positive value)
    
    """
    
    plotx = np.linspace(0, img.shape[1]-1, img.shape[1])
    
    # Choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(ploty)
    
    # Find middle pixel of image
    middle_img = np.median(plotx)
    
    # Find middle pixel of lanes
    middle_lanes = left_fitx[int(ploty[-1])] + (right_fitx[int(ploty[-1])]-left_fitx[int(ploty[-1])])/2
    
    line_base_pos_left = middle_img - left_fitx[int(ploty[-1])]
    line_base_pos_right = right_fitx[int(ploty[-1])] - middle_img
    
    # Calculate and return offset
    offset = int(middle_img - middle_lanes)
    return offset, line_base_pos_left, line_base_pos_right
In [42]:
def convert_position_to_real(offset, line_base_pos_left, line_base_pos_right):
    """
    Input:
    offset (int): Offset of the car in pixel. If value negative -> Car offset to the left
                                                If value positive -> Car is offset to the right
    line_base_pos_left (int): Distance from middle of car to left lane in pixel (positive value)
    line_base_pos_right (int): Distance from middle of car to right lane in pixel (positive value)
    Output:
    offset (int): Offset of the car in meter. If value negative -> Car offset to the left
                                                If value positive -> Car is offset to the right
    line_base_pos_left (int): Distance from middle of car to left lane in pixel (positive value)
    line_base_pos_right (int): Distance from middle of car to right lane in pixel (positive value)
    """
    # Define conversion factor from pixel to meter
    xm_per_pix = 3.7/700
    
    # Perform conversion
    offset = xm_per_pix * offset
    line_base_pos_left = xm_per_pix * line_base_pos_left
    line_base_pos_right = xm_per_pix * line_base_pos_right
    
    return offset, line_base_pos_left, line_base_pos_right
In [43]:
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw

def print_data_to_image(img, left_curverad, right_curverad, offset, line_base_pos_left, line_base_pos_right):
    """
    Prints curve radiuses in meters and offset in meters and outputs it on the image
    """
    img_shape = img.shape
    text_position = (int(img_shape[1]*1/20),int(img_shape[0]*1/20))
    polygon = np.array([(text_position[0], text_position[1]),
                        (text_position[0]+img_shape[1]*9/40, text_position[1]),
                        (text_position[0]+img_shape[1]*9/40, text_position[1]+img_shape[0]*3/20),
                        (text_position[0], text_position[1]+img_shape[0]*3/20)])
    #polygon = np.array([[1,1],[100,1],[100,100]],np.int32)
    cv2.fillPoly(img,np.int_([polygon]),(255,255,255),lineType = 4)
    y0 = text_position[1]+20
    dy = 20
    text = "Left curve radius:    {:08.2f}m\nRight curve radius:   {:08.2f}m\nOffset:              {:09.2f}m\nLeft lane:             {:08.2f}m\nRight lane:            {:08.2f}m".format(left_curverad,right_curverad,offset, line_base_pos_left, line_base_pos_right)
    for i, line in enumerate(text.split('\n')):
        y = y0 + i*dy
        cv2.putText(img, line, (text_position[0]+10, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0),1)
    #cv2.putText(img,"Hello World!!!\ntest", text_position, 
                #cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0))
    return img

2.6 Transform lane view back to initial image

In [44]:
def warped_to_real(img_warped, img_dst, ploty, left_fitx, right_fitx, src, dst):
    # Create inverse perspective transform
    Minv = cv2.getPerspectiveTransform(dst, src)
    
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(img_warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
    # Combine the result with the original image
    out_img = cv2.addWeighted(img_dst, 1, newwarp, 0.3, 0)
    return out_img
In [45]:
result = warped_to_real(test_images_warped_cb["test_images/test2.jpg"], 
                        test_images_dst["test_images/test2.jpg"], ploty, 
                        left_fitx, right_fitx, src, dst)

plt.imshow(result)
plt.imsave("output_images/21_retransform.png",result)

3 Apply pipeline on video

3.1 Define class to store previous image values

In [46]:
class Line():
    def __init__(self, backlog):
        """
        Input:
        backlog (int): Number of data from previous images to be stored
        """        
        
        # what is the backlog stored in the class
        self.backlog = backlog
        # was the line detected in the last iteration?
        self.detected = False  
        # x values of the last n fits of the line
        self.recent_xfitted = [] 
        #average x values of the fitted line over the last n iterations
        self.bestx = None 
        #polynomial coefficients over the last n iterations
        self.recent_best_fit = []
        #polynomial coefficients averaged over the last n iterations
        self.best_fit = None  
        
        #currently not required as included in recent_best_fit
        #polynomial coefficients for the most recent fit
        #self.current_fit = [np.array([False])] 
        
        #radius of curvature of the line in meter
        self.radius_of_curvature = None 
        
        #radius of curvature over the last n iterations
        self.recent_radius_of_curvature = []
        
        # Offset from center of the line
        self.offset = None
        
        # Offset from center of the line over the last n iterations
        self.recent_offset = []
        
        # distance in meters of vehicle center from the line
        self.line_base_pos = None 
        
        # distance in meters of vehicle center from the line over the last n iterations
        self.recent_line_base_pos = []
        
        # difference in fit coefficients between last and new fits
        self.diffs = np.array([0,0,0], dtype='float') 
        
        # x values for detected line pixels
        self.allx = None  
        
        # y values for detected line pixels
        self.ally = None

    
    
    def update_value_collection(self, update_item, update_value):
        """
        Input:
        update item (list): needs to be a stored list to be updated with a new value
        udate_value: needs to be the value with which the list should be updated
        Output:
        update_item (list): updated list
        """
        udate_item = update_item.append(update_value)
        if len(update_item) > self.backlog:
            update_item = update_item[1:]
        return update_item

    
    def update(self,detected,recent_xfitted,recent_best_fit,radius_of_curvature,offset,line_base_pos, allx, ally):
        """
        Input:
        detected (bool): was the line detected in the last iteration?
        recent_xfitted (np.array): x values of the last fit of the line
        recent_best_fit (np.array): polynomial coefficients of the last iteration
        radius_of_curvature (float): radius of curvature of the line in meter
        line_base_pos (float): distance in meters of vehicle center from the line
        allx (np.array): x values for detected line pixels
        ally (np.array): y values for detected line pixels                
        """
        self.detected = detected
        self.recent_xfitted = self.update_value_collection(self.recent_xfitted, recent_xfitted)
        self.bestx = np.mean(self.recent_xfitted)
        self.recent_best_fit = self.update_value_collection(self.recent_best_fit, recent_best_fit)
        self.best_fit = np.mean(self.recent_best_fit)
        self.radius_of_curvature = radius_of_curvature
        self.recent_radius_of_curvature = self.update_value_collection(self.recent_radius_of_curvature, radius_of_curvature)
        self.offset = offset
        self.recent_offset = self.update_value_collection(self.recent_offset,offset)
        self.line_base_pos = line_base_pos
        self.recent_line_base_pos = self.update_value_collection(self.recent_line_base_pos,line_base_pos)
        try:
            self.diffs = self.recent_best_fit[-1] - self.recent_best_fit[-2]
        except:
            self.diffs = np.array([0,0,0], dtype='float')
        self.allx = allx
        self.ally = ally    

3.2 Define function to apply processing pipeline on image

In [47]:
def process_image(img, mtx, dist, src, dst):
    """
    Function to process full lane line identification pipeline on single image
    
    Input:
    img, mtx, dist, src, dst
    
    Output:
    out_img, left_curverad, right_curverad, offset, line_base_pos_left, line_base_pos_right   
    """
    
    

    # Image pipeline
    img_dst = distortion_correction(img,mtx,dist)
    img_bin = img_to_thresh_bin(img = img_dst)
    img_warp = warp(img_bin,src,dst)
    
    # If both lines were detected in the last image start new search based on last image range
    if (left_line.detected == True) and (right_line.detected == True):
        out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx, leftx_dir_marker, rightx_dir_marker = identify_lane_line_cont(img_warp,left_line.recent_best_fit[-1], right_line.recent_best_fit[-1])
    # Else start new search
    else:
        out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, righty, leftx, rightx, leftx_dir_marker, rightx_dir_marker = identify_lane_line_first(img_warp)
    
    left_curverad, right_curverad = convert_radpos_to_real(ploty, lefty, righty, leftx, rightx)
    offset, line_base_pos_left, line_base_pos_right = det_position_warped(out_img,ploty, left_fitx, right_fitx)
    offset, line_base_pos_left, line_base_pos_right = convert_position_to_real(offset, line_base_pos_left, line_base_pos_right)
    out_img = warped_to_real(img_warp, img_dst, ploty, left_fitx, right_fitx, src, dst)
    out_img = print_data_to_image(out_img, left_curverad, right_curverad, offset, line_base_pos_left, line_base_pos_right)
    
    
    # Conditions for non-detection of lanes
    # If the width of the lane lines is greater than 6 or smaller than 2 this is unrealistic for lane lines
    condition1 = line_base_pos_left + line_base_pos_right >= 4 or line_base_pos_left + line_base_pos_right <= 2.8
    # If lane lines are pointing in different directions and one of the curve radiuses is smaller 1000m
    # -> If the lanes are straight they might very well have different directions
    condition2 = (leftx_dir_marker!= rightx_dir_marker) and (left_curverad < 1000 or right_curverad < 1000)
    # Strong differences in curve radius at small turns
    condition3 = (left_curverad < 1000 and right_curverad < 1000) and (left_curverad - right_curverad) * (-1) > 100
    
    # Determine wheter detected lane lines are valid based on condition
    if condition1 or condition2 or condition3:
        detected = False
        out_img = img_dst

    else:
        detected = True
        
    # Perform required updates depending on whether identified lane lines are valid or not
    # If valid, update Line class values accordingly
    if detected:
        left_line.update(detected,left_fitx,left_fit,left_curverad,offset,line_base_pos_left, leftx, lefty)
        right_line.update(detected,right_fitx,right_fit,right_curverad,offset,line_base_pos_right, rightx, righty)
    # If not valid respond to line-class, that lines were not identified
    else:
        left_line.detected = False
        right_line.detected = False
        
        # Create an outputimage with average of previous 3 successful detected image lanes (try-statement for first three images)
        try:
            # Define average values
            left_fitx_avg = np.mean(left_line.recent_xfitted[-3:-1],axis = 0)
            right_fitx_avg = np.mean(right_line.recent_xfitted[-3:-1],axis = 0)
            left_curverad_avg = np.mean(left_line.recent_radius_of_curvature[-3:-1],axis = 0)
            right_curverad_avg =  np.mean(right_line.recent_radius_of_curvature[-3:-1],axis = 0)
            offset_avg =  np.mean(left_line.recent_offset[-3:-1],axis = 0)
            line_base_pos_left_avg = np.mean(left_line.recent_line_base_pos[-3:-1],axis = 0)
            line_base_pos_right_avg = np.mean(left_line.recent_line_base_pos[-3:-1],axis = 0)
            
            # Create output image based on average values
            out_img = warped_to_real(img_warp, img_dst, ploty, left_fitx_avg, right_fitx_avg, src, dst)
            out_img = print_data_to_image(out_img, left_curverad_avg, right_curverad_avg, offset_avg, 
                                          line_base_pos_left_avg, line_base_pos_right_avg)
        except:
            pass
    
    if detected:
        return out_img, left_curverad, right_curverad, offset, line_base_pos_left, line_base_pos_right
    else:
        return out_img,None,None,None,None,None,

3.3 Process video

In [48]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
In [49]:
# Initialize classes to track values for left and right lane detection
left_line = Line(backlog = 5)
right_line = Line(backlog = 5)

# Define a function which only returns the image output of the function "process_image()"
def process_image_video(img):

    result,_,_,_,_,_ = process_image(img, mtx, dist, src, dst)

    return result
In [50]:
# Process the video
video_output = 'project_video_annotated.mp4'
video_input = VideoFileClip("project_video.mp4")
white_clip = video_input.fl_image(process_image_video) #NOTE: this function expects color images!!
%time white_clip.write_videofile(video_output, audio=False)
[MoviePy] >>>> Building video project_video_annotated.mp4
[MoviePy] Writing video project_video_annotated.mp4
100%|█████████▉| 1260/1261 [06:51<00:00,  3.10it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_video_annotated.mp4 

CPU times: user 9min 27s, sys: 2.63 s, total: 9min 30s
Wall time: 6min 53s

Testing functions (only relevant for testing if pipeline is modified)

In [ ]:
# Initialize classes to track values for left and right lane detection
left_line = Line(backlog = 5)
right_line = Line(backlog = 5)


processed_img = {}
left_curverad = {}
right_curverad = {}
offset = {}
line_base_pos_left = {}
line_base_pos_right = {}

for key in sorted(test_images_dst.keys()):
    processed_img[key],left_curverad[key] ,right_curverad[key],offset[key], line_base_pos_left[key], line_base_pos_right[key] = process_image(test_images[key],mtx,dist,src,dst)    
    print(offset[key])
    #plot_image_comparison(test_images[key],processed_img[key],
                          #("{}\nleft curverad: {:.2f}m\nright curverad: {:.2f}m\noffset: {:.2f}m".format(key,left_curverad[key],left_curverad[key],offset[key])), 
                          #annotation,cmap_before=None,cmap_after="gray")
plt.imsave("test.png",processed_img["test_images/test4.jpg"])
In [ ]:
 # Initialize classes to track values for left and right lane detection
left_line = Line(backlog = 5)
right_line = Line(backlog = 5)
processed_img,left_curverad ,right_curverad,offset, line_base_pos_left, line_base_pos_right = process_image(
    test_images["test_images/test4.jpg"],mtx,dist,src,dst)